Every choice we make along our RNA-seq analysis pipeline will affect the results we get and the conclusions we make about the data. Here, we document and explain the choices we make for each critical step in our PDX analysis:
# Load packages
source(here::here('packages.R'))
#Read in PDX RDS object
PDX = readRDS("data/Izar_2020/Izar_2020_PDX.RDS")
treatment_levels <- c("vehicle", "MRD", "relapse")
PDX$treatment.status = factor(PDX$treatment.status, levels = treatment_levels)
# Read in gene lists
ccgenes = read_lines("gene_lists/regev_lab_cell_cycle_genes.txt")
s.genes <- ccgenes[1:43]
g2m.genes <- ccgenes[44:97]
#Read in hallmarks of interest
hallmark_names = read_lines("gene_lists/hallmarks.txt")
hallmark.list <- vector(mode = "list", length = length(hallmark_names))
names(hallmark.list) <- hallmark_names
for(hm in hallmark_names){
file <- read_lines(glue("hallmarks/{hm}_updated.txt"), skip = 1)
hallmark.list[[hm]] <- file
}
if(!dir.exists("jesslyn_plots/PDX_test")){dir.create("jesslyn_plots/PDX_test")}
IMPORTANCE: scaling makes count data more comparable across genes and cells, and how we scale our data affects downstream analysis such as dimensional reduction (PCA).
#Scale PDX data in the four different ways
PDX_orig = ScaleData(PDX, do.scale = F, do.center = F)
PDX_center = ScaleData(PDX, do.scale = F, do.center = T)
PDX_scale = ScaleData(PDX, do.scale = T, do.center = F)
PDX_scale_center = ScaleData(PDX, do.scale = T, do.center = T)
#COMPARE EACH SCENARIO THROUGH VISUALIZATION
#compute and plot the mean expression per cell in each scenario
df = data.frame(
"orig" = colMeans(PDX_orig[["RNA"]]@scale.data),
"center" = colMeans(PDX_center[["RNA"]]@scale.data),
"scale" = colMeans(PDX_scale[["RNA"]]@scale.data),
"scale-center" = colMeans(PDX_scale_center[["RNA"]]@scale.data)
)
plot.df = reshape2::melt(df)
p1 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX data norm type", y = "mean expression/cell", title = "expression/cell (Plot #1)") +
theme_bw()
#compute and plot the mean expression per gene in each scenario
df = data.frame(
"orig" = rowMeans(PDX_orig[["RNA"]]@scale.data),
"center" = rowMeans(PDX_center[["RNA"]]@scale.data),
"scale" = rowMeans(PDX_scale[["RNA"]]@scale.data),
"scale-center" = rowMeans(PDX_scale_center[["RNA"]]@scale.data)
)
plot.df = reshape2::melt(df)
p2 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX data norm type", y = "mean expression/gene", title = "evaluate centering (Plot #2)") +
ylim(-5,10) +
theme_bw()
#compute and plot the standard deviation across cells per gene in each scenario
sd.df = data.frame(
"orig" = apply(PDX_orig[["RNA"]]@scale.data,1,sd),
"center" = apply(PDX_center[["RNA"]]@scale.data,1,sd),
"scale" = apply(PDX_scale[["RNA"]]@scale.data,1,sd),
"scale-center" = apply(PDX_scale_center[["RNA"]]@scale.data,1,sd)
)
plot.df = reshape2::melt(sd.df)
p3 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX data norm type", y = "SD/gene", title = "evaluate scaling (Plot #3)") +
theme_bw()
p1+p2+p3 + patchwork::plot_layout(nrow = 1)
ggsave(filename = "PDX_data_scaletest.png", path = "jesslyn_plots/PDX_test", width = 15, height = 5)
We previously decided the parameters for ScaleData (do.scale = do.center = T). Now, we’d like to see the effect of scaling before subsetting vs. scaling each model individually after subsetting by model, to decide which scenario is best.
QUESTION: Should we scale our data before subsetting by model, or should we subset by model first?
IMPORTANCE: We are interested in comparing gene expression between treatment conditions within each model. Considering that the difference between models are so drastic, using count data that is scaled so that it is comparable across genes across models might mask over the smaller differences between treatment conditions within a specific model. It is therefore a significant step to make sure that our count data in each model is scaled so that it is comparable across all cells within a specific model, instead of across models. We also want to center the average expression of each gene at 0.
HYPOTHESIS: We hypothesize that we should subset by model first, before scaling the data in each model separately, so that the data for each model would be scaled across all cells within the specific model itself.
#scenario 1: scale first then subset (PDX -> scale -> subset)
scale_center_DF20 <- subset(PDX_scale_center, subset = (model_ID == "DF20"))
#scenario 2: subset first, then scale (PDX -> subset -> scale)
DF20 <- subset(PDX, subset = model_ID == "DF20")
DF20_scale_center <- ScaleData(DF20, do.scale = T, do.center = T)
#scenario 3: scale first, subset, and scale again (PDX -> scale -> subset -> scale)
scale_center_DF20_scale_center <- ScaleData(scale_center_DF20, do.scale = T, do.center = T)
#COMPARE EACH SCENARIO THROUGH VISUALIZATION
#mean expression per cell within model DF20
cell.mean.df = data.frame(
"scale-center-DF20" = colMeans(scale_center_DF20[["RNA"]]@scale.data),
"DF20-scale-center" = colMeans(DF20_scale_center[["RNA"]]@scale.data),
"before-DF20-after" = colMeans(scale_center_DF20_scale_center[["RNA"]]@scale.data)
)
plot.df = reshape2::melt(cell.mean.df)
p1 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX scale vs. subset sequence", y = "mean expression/cell", title = "expression/cell") +
theme_bw()
#mean expression per gene across all cells within model DF20
gene.mean.df = data.frame(
"scale-center-DF20" = rowMeans(scale_center_DF20[["RNA"]]@scale.data),
"DF20-scale-center" = rowMeans(DF20_scale_center[["RNA"]]@scale.data),
"before-DF20-after" = rowMeans(scale_center_DF20_scale_center[["RNA"]]@scale.data)
)
plot.df = reshape2::melt(gene.mean.df)
p2 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX scale vs. subset sequence", y = "mean expression/gene", title = "evaluate the mean expression per gene") +
theme_bw()
#standard deviation per gene across all cells within model DF20
sd.df = data.frame(
"scale-center-DF20" = apply(scale_center_DF20[["RNA"]]@scale.data,1,sd),
"DF20-scale-center" = apply(DF20_scale_center[["RNA"]]@scale.data,1,sd),
"before-DF20-after" = apply(scale_center_DF20_scale_center[["RNA"]]@scale.data,1,sd)
)
plot.df = reshape2::melt(sd.df)
p3 = ggplot(plot.df, aes(x = variable, y = value, fill = variable)) +
geom_violin() +
labs(x = "PDX scale vs. subset sequence", y = "variance/gene", title = "evaluate the variance in expression per gene") +
theme_bw()
p1+p2+p3 + patchwork::plot_layout(nrow = 1)
ggsave(filename = "PDX_data_scaleVSsubset.png", path = "jesslyn_plots/PDX_test", width = 15, height = 5)
before.DF20.after = scale and center across all cells, subset by model, scale and center within each model again
QUESTION: Which data slot does AddModuleScore use?
IMPORTANCE: The type of data we use to score our cells will drastically affect downstream DE analysis.
HYPOTHESIS: we hypothesize that AddModuleScore uses the normalized but unscaled, uncentered “data” slot.
#Calculate module score on the unscaled, uncentered PDX data, and plot the distribution of OXPHOS scores
PDX_orig <- AddModuleScore(PDX_orig, features = hallmark.list, name = names(hallmark.list), nbin = 25, search = T)
p1 <- VlnPlot(PDX_orig, features = "HALLMARK_OXIDATIVE_PHOSPHORYLATION25") + labs(title = "oxphos orig.score distribution", x = "PDX_orig")
#Calculate module score on scaled and centered PDX data, and plot the distribution of OXPHOS scores
PDX_scale_center <- AddModuleScore(PDX_scale_center, features = hallmark.list, name = names(hallmark.list), nbin = 25, search = T)
p2 <- VlnPlot(PDX_scale_center, features = "HALLMARK_OXIDATIVE_PHOSPHORYLATION25") + labs(title = "oxphos scale.center.score distribution", x = "PDX_scale_center")
p1 + p2 + plot_layout(guides= 'collect', nrow = 1, ncol = 2)
ggsave(filename = "PDX_data_addScoreTest.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
Our current workflow: PDX -> subset -> scale and center separately
IMPORTANCE: Since we are interested in detecting differential expression across treatment conditions within each model, it is important to investigate whether there are significant differences between scoring cells before vs. after subsetting by model, and to determine which workflow would be better.
#Scenario 1: PDX -> scale -> add module score -> subset
score_DF20 <- subset(PDX_scale_center, subset = (model_ID == "DF20"))
p3 <- VlnPlot(score_DF20, features = "HALLMARK_OXIDATIVE_PHOSPHORYLATION25") + labs(title = "OXPHOS score first subset later (DF20)") + theme(plot.title = element_text(size = 8))
#Scenario 2: PDX -> subset -> scale -> add module score
DF20_score <- AddModuleScore(DF20_scale_center, features = hallmark.list, name = names(hallmark.list), nbin = 25, search = T)
p4 <- VlnPlot(DF20_score, features = "HALLMARK_OXIDATIVE_PHOSPHORYLATION25") + labs(title = "OXPHOS subset first score individually (DF20)") + theme(plot.title = element_text(size = 8))
#compare distribution of OXPHOS score between the two scenarios
p3 + p4 + plot_layout(guides= 'collect', nrow = 1, ncol = 2)
ggsave(filename = "PDX_data_scoreVSsubset.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
We wonder if it would be better to “force” AddModuleScore in using the “scale.data” slot (scaled and centered by model) instead, since the counts in the “scale.data” slot is relative to all cells within DF20, instead of across all models.
IMPORTANCE: The type of data we use to score our cells may drastically affect downstream DE analysis.
HYPOTHESIS: We hypothesize that using the “scale.data” slot would be best, since it is scaled across all cells within a specific model.
hms <- c("HALLMARK_OXIDATIVE_PHOSPHORYLATION25", "HALLMARK_UNFOLDED_PROTEIN_RESPONSE33", "HALLMARK_P53_PATHWAY26")
#Scenario 1: Using the "data" slot (basically scenario 2 from above, DF20_score object)
p1 <- VlnPlot(DF20_score, features = hms, combine= F)
p1[[1]] <- p1[[1]] + labs(title = "OXPHOS ('data')", x = "DF20")
p1[[2]] <- p1[[2]] + labs(title = "UPR ('data')", x = "DF20")
p1[[3]] <- p1[[3]] + labs(title = "p53 ('data')", x = "DF20")
#Scenario 2: Using the "scale.data" slot
scale.data <- GetAssayData(object = DF20_scale_center, slot = "scale.data")
DF20_scale_center_forced <- SetAssayData(object = DF20_scale_center, slot = "data", new.data = scale.data, assay = "RNA")
DF20_scale.dataSlot <- AddModuleScore(DF20_scale_center_forced, features = hallmark.list, name = names(hallmark.list), nbin = 25, search = T)
p2 <- VlnPlot(DF20_scale.dataSlot, features = hms, combine = F)
p2[[1]] <- p2[[1]] + labs(title = "OXPHOS ('scale.data')", x = "DF20")
p2[[2]]<- p2[[2]] + labs(title = "UPR ('scale.data')", x = "DF20")
p2[[3]] <- p2[[3]] + labs(title = "p53 ('scale.data')", x = "DF20")
#Scenario 3: Using the "data" slot, center the scores afterward, reassign to metadata
hm.names <- names(DF20_score@meta.data)[9:42]
hms.centered <- c("HALLMARK_OXIDATIVE_PHOSPHORYLATION25.centered", "HALLMARK_UNFOLDED_PROTEIN_RESPONSE33.centered", "HALLMARK_P53_PATHWAY26.centered")
for(i in hm.names){
hm.centered <- scale(DF20_score[[i]], scale = FALSE)
DF20_score <- AddMetaData(DF20_score, hm.centered, col.name = glue("{i}.centered"))
}
p3 <- VlnPlot(DF20_score, features = hms.centered, combine = F)
p3[[1]] <- p3[[1]] + labs(title = "OXPHOS ('data' score centered)", x = "DF20")
p3[[2]] <- p3[[2]] + labs(title = "UPR ('data' score centered)", x = "DF20")
p3[[3]] <- p3[[3]] + labs(title = "p53 ('data' score centered)", x = "DF20")
#COMPARE
p1[[1]] + p3[[1]] + p2[[1]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_oxphos.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
p1[[2]] + p3[[2]] + p2[[2]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_UPR.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
p1[[3]] + p3[[3]] + p2[[3]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_p53.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
To look into this more, we compare the variance in module score between the scenarios.
#compute standard deviation per module
DF20_data.df <- DF20_score@meta.data %>% as.data.frame()
DF20_scale.data.df <- DF20_scale.dataSlot@meta.data %>% as.data.frame()
sd.df = data.frame(
"DF20_data" = apply(DF20_data.df[9:42], 2, sd),
"DF20_data.centered" = apply(DF20_data.df[43:76], 2, sd),
"DF20_scale.data" = apply(DF20_scale.data.df[9:42],2,sd)
)
plot.df = reshape2::melt(sd.df)
ggplot(plot.df, aes(x = variable, y = value, fill = variable)) + geom_violin() +
labs(x = "DF20 AddModuleScore senario", y = "variance/module", title = "Comparing DF20 variance in module score") +
theme_bw()
ggsave(filename = "DF20_AddModuleScore_variance.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
To further investigate this issue, we now split the violin plot (still focusing solely on DF20) by treatment status to examine if the trends we see are similar between using the data vs. scale.data slot. * We hypothesize that we should see clear trends where the MRD treatment condition differentially overexpress OXPHOS, UPR, and p53 genes, especially when using the scale.data slot.
#Scenario 1: Using the "data" slot
p1 <- VlnPlot(DF20_score, features = hms, combine= F, group.by = "treatment.status")
p1[[1]] <- p1[[1]] + labs(title = "OXPHOS ('data')", x = "DF20")
p1[[2]] <- p1[[2]] + labs(title = "UPR ('data')", x = "DF20")
p1[[3]] <- p1[[3]] + labs(title = "p53 ('data')", x = "DF20")
#Scenario 2: Using the "scale.data" slot
p2 <- VlnPlot(DF20_scale.dataSlot, features = hms, combine = F, group.by = "treatment.status")
p2[[1]] <- p2[[1]] + labs(title = "OXPHOS ('scale.data')", x = "DF20")
p2[[2]]<- p2[[2]] + labs(title = "UPR ('scale.data')", x = "DF20")
p2[[3]] <- p2[[3]] + labs(title = "p53 ('scale.data')", x = "DF20")
#Scenario 3: Using the "data" slot, center the scores afterward, reassign to metadata
p3 <- VlnPlot(DF20_score, features = hms.centered, combine = F, group.by = "treatment.status")
p3[[1]] <- p3[[1]] + labs(title = "OXPHOS ('data' score centered)", x = "DF20")
p3[[2]] <- p3[[2]] + labs(title = "UPR ('data' score centered)", x = "DF20")
p3[[3]] <- p3[[3]] + labs(title = "p53 ('data' score centered)", x = "DF20")
#COMPARE
p1[[1]] + p3[[1]] + p2[[1]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_oxphosByT.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
p1[[2]] + p3[[2]] + p2[[2]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_UPRByT.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
p1[[3]] + p3[[3]] + p2[[3]] + plot_layout(guides= 'collect', nrow = 1, ncol = 3)
ggsave(filename = "DF20_AddModuleScore_p53ByT.png", path = "jesslyn_plots/PDX_test", width = 10, height = 5)
Although similar trends are seen between the three scenarios, there are also some inconsistencies between them. For example: * It seems like the trends are more obvious when calculating module score with the scale.data slot. - This is what we hoped/ expected because the scale.data slot scales the counts across cells within the same model, rather than across all models (which would have masked intra-model differences due to the large inter-model differences) * For the UPR plot, it seems like MRD has the highest expression level when using the “data” slot, but not when using the “scale.data” slot. This is weird because scaling the data shouldn’t affect the relative position of the count data. For example, if a cell in treatment condition A expresses a higher level of gene A than condition B, scaling the data and controlling for the standard deviation should still make condition A express higher levels of fene A than condition B. * The results from this graph therefore tells us that there are significant differences depending on the slot we use for AddModuleScore considering how they resulted in different trends.
QUESTION: Which data slot is best for FindMarkers?
IMPORTANCE: Possible that we will get different DE genes, LogFC, and pvalues, depending on the data slot we use. This may drastically affect downstream DE Analysis such as GSEA and Volcano Plots, which both rely on the FindMarkers function.
HYPOTHESIS: We hypothesize that it would be better to use the “data” slot because the “scale.data” slot are already z-scores.
data_slot <- FindMarkers(DF20_score, group.by = "treatment.status", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0) %>% rownames_to_column %>% arrange(p_val_adj, -avg_logFC) %>% select(rowname, avg_logFC, p_val_adj)
data_slot <- data_slot %>% mutate("data.rank" = seq(from=1, to=length(data_slot$rowname))) %>% mutate("data.DE" = (data_slot$avg_logFC > 0.5 | data_slot$avg_logFC < -0.5) & (data_slot$p_val_adj < 0.05))
scale.data.slot <- FindMarkers(DF20_score, group.by = "treatment.status", slot = "scale.data", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0) %>% rownames_to_column %>% arrange(p_val_adj, -avg_diff) %>% select(rowname, avg_diff, p_val_adj)
scale.data.slot <- scale.data.slot %>% mutate("scale_data.rank" = seq(from=1, to=length(scale.data.slot$rowname))) %>% mutate("scale.data.DE" = (scale.data.slot$avg_diff > 0.5 | scale.data.slot$avg_diff < -0.5) & (scale.data.slot$p_val_adj < 0.05))
both <- merge(data_slot, scale.data.slot, by="rowname")
both <- both %>% mutate("both.DE" = (both$data.DE & both$scale.data.DE))
identical(data_slot$rowname, scale.data.slot$rowname)
## [1] FALSE
length(which(data_slot$rowname != scale.data.slot$rowname))/13893
## [1] 0.9512704
#compare ranks
ranks.plot <- ggplot(both, aes(x = data.rank, y = scale_data.rank)) + geom_point(aes(alpha = 0.5, colour = factor(both.DE))) + geom_abline() + labs(title = "Comparing gene ranks when using data vs. scale.data", x = "data", y = "scale.data", colour = "DE for both slots", alpha = "Transparency") +
theme(plot.title = element_text(size = 8)) + stat_cor(method = "pearson")
ggsave(plot = ranks.plot, "DF20_FindMarkers_slot_ranks.png", path = "jesslyn_plots/PDX_test")
#compare padj
padj.plot <- ggscatter(both, x = "p_val_adj.x", y = "p_val_adj.y", add = "reg.line", add.params = list(color = "grey")) + labs(title = "Comparing the padj values of using data vs. scale.data", x = "data", y = "scale.data") + theme(plot.title = element_text(size = 8)) + stat_cor(method = "pearson")
ggsave(plot = padj.plot, "DF20_FindMarkers_slot_padj.png", path = "jesslyn_plots/PDX_test")
#compare avg_logfc vs. avg_diff
logFC.plot <- ggplot(both, aes(x = avg_logFC, y = avg_diff)) + geom_point(aes(alpha = 0.5, colour = factor(both.DE))) + labs(title = "Comparing the logFC and avg_diff values of using data vs. scale.data", x = "data", y = "scale.data", colour = "DE for both slots", alpha = "Transparency") + theme(plot.title = element_text(size = 8))
ggsave(plot = logFC.plot, "DF20_FindMarkers_slot_logFC.png", path = "jesslyn_plots/PDX_test")
ranks.plot + padj.plot + logFC.plot + plot_layout(guides= 'collect')
We examine how padj and logFC (or avg_diff) values correlate between the two scenarios: - padj values associated with each gene is identical regardless of the slot used (correlation coefficient = 1) - Makes sense because the wilcoxon rank sum test is used here, so the unscaled vs. scaled count data should be given the same rank in each scenario to calculate the p values. - The ranks therefore differ depending on the slot used for FindMarkers due to avg_logFC (or avg_diff) - Unexpected because although the scale.data slot are z-scores, the scaling shouldn’t have changed the relative expression across cells for a single gene. For example, if a condition expresses more a gene A than the other condition, while they expresses gene B at the same level, scaling the counts should not change the fact that gene A is differentially expressed while gene B is not. The avg_logFC of gene A relative to gene B should therefore be identical to the avg_diff of gene A relative to gene B. - Although it could be possible that FindMarkers may have scaled the counts in the scale.data slot again, which may have tightened the spread of the data even more and obscure differences between conditions, the fact that FindMarkers report values in avg_diff instead of avg_logFC imply that the function should already know that it is working with z-scores when slot is set to scale.data. - We therefore investigate how the distribution of avg_logFC when using the “data” slot compares to the distribution of avg_diff when using the “scale.data” slot for FindMarkers.
fc.diff = data.frame(
"avg_logFC" = data_slot$avg_logFC,
"avg_diff" = scale.data.slot$avg_diff
)
plot.df = reshape2::melt(fc.diff)
ggplot(plot.df, aes(x = variable, y = value, fill = variable)) + geom_violin() +
labs(x = "'data' vs. 'scale.data'", y = "logFC or diff", title = "Comparing the distribution of avg_logFC vs. avg_diff") +
theme_bw()
ggsave("DF20_FindMarkers_slot_logFC2.png", path = "jesslyn_plots/PDX_test")
We now build MAPlots to try to determine whether avg_logFC or avg_diff would be a better metric to use.
MA.df = data.frame(
"rowname" = rownames(DF20_score[["RNA"]]@data),
"normalized.exp" = rowMeans(DF20_score[["RNA"]]@data)
)
MA.df <- merge(MA.df, both, by = "rowname")
FC <- ggplot(MA.df, aes(x = normalized.exp, y = avg_logFC)) + geom_point() + geom_hline(yintercept =0, colour = "grey") + labs(x = "Log2 Mean Expression", y = "logFC", title = "Mean expression (from data slot) vs. logFC")
diff <- ggplot(MA.df, aes(x = normalized.exp, y = avg_diff)) + geom_point() + geom_hline(yintercept = 0, colour = "grey") + labs(x = "Log2 Mean Expression", title = "Mean expression (from data slot) vs. avg_diff")
FC + diff
IMPORTANCE: Different statistical tests use different approximations and assumptions, which may slightly affect the resulting DE genes they detect.
HYPOTHESIS: We hypothesize that the wilcox rank sum test would be the best test to use.
wilcox <- FindMarkers(DF20_score, group.by = "treatment.status", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0, test.use = "wilcox") %>% rownames_to_column %>% arrange(p_val_adj, -avg_logFC) %>% select(rowname, avg_logFC, p_val_adj)
wilcox <- wilcox %>% mutate("wilcox.rank" = seq(from=1, to=length(wilcox$rowname)))
colnames(wilcox) <- c("rowname", "wilcox.logFC", "wilcox.padj", "wilcox.rank")
t.test <- FindMarkers(DF20_score, group.by = "treatment.status", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0, test.use = "t") %>% rownames_to_column %>% arrange(p_val_adj, -avg_logFC) %>% select(rowname, avg_logFC, p_val_adj)
t.test <- t.test %>% mutate("t.rank" = seq(from=1, to=length(t.test$rowname)))
colnames(t.test) <- c("rowname", "t.logFC", "t.padj", "t.rank")
MAST <- FindMarkers(DF20_score, group.by = "treatment.status", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0, test.use = "MAST") %>% rownames_to_column %>% arrange(p_val_adj, -avg_logFC) %>% select(rowname, avg_logFC, p_val_adj)
MAST <- MAST %>% mutate("MAST.rank" = seq(from=1, to=length(MAST$rowname)))
colnames(MAST) <- c("rowname", "MAST.logFC", "MAST.padj", "MAST.rank")
LR <- FindMarkers(DF20_score, group.by = "treatment.status", ident.1 = "MRD", ident.2 = "vehicle", logfc.threshold = 0, test.use = "LR") %>% rownames_to_column %>% arrange(p_val_adj, -avg_logFC) %>% select(rowname, avg_logFC, p_val_adj)
LR <- LR %>% mutate("LR.rank" = seq(from=1, to=length(LR$rowname)))
colnames(LR) <- c("rowname", "LR.logFC", "LR.padj", "LR.rank")
wilcox.t <- merge(wilcox, t.test, by = "rowname")
MAST.LR <- merge(MAST, LR, by = "rowname")
all <- merge(wilcox.t, MAST.LR, by = "rowname")
which.diff = data.frame(
"wilcox.vs.t" = length(which(wilcox$rowname != t.test$rowname))/13893,
"wilcox.vs.MAST" = length(which(wilcox$rowname != MAST$rowname))/13893,
"wilcox.vs.LR" = length(which(wilcox$rowname != LR$rowname))/13893,
"t.vs.MAST" = length(which(t.test$rowname != MAST$rowname))/13893,
"t.vs.LR" = length(which(t.test$rowname != LR$rowname))/13893,
"MAST.vs.LR" = length(which(MAST$rowname != LR$rowname))/13893
)
rownames(which.diff) <- "% Different"
which.diff
## wilcox.vs.t wilcox.vs.MAST wilcox.vs.LR t.vs.MAST t.vs.LR MAST.vs.LR
## % Different 0.9993522 0.9947456 0.9994242 0.9994961 0.9978406 0.9961851
#compare padj
all.padj <- ggpairs(all, columns = c("wilcox.padj", "t.padj", "MAST.padj", "LR.padj"), diag = "blank") + labs(title = "Comparing padj values between statistical tests")
all.padj[2,1] <- all.padj[2,1] + geom_abline(colour = "grey")
all.padj[3,1] <- all.padj[3,1] + geom_abline(colour = "grey")
all.padj[3,2] <- all.padj[3,2] + geom_abline(colour = "grey")
all.padj[4,1] <- all.padj[4,1] + geom_abline(colour = "grey")
all.padj[4,2] <- all.padj[4,2] + geom_abline(colour = "grey")
all.padj[4,3] <- all.padj[4,3] + geom_abline(colour = "grey")
all.padj
ggsave(plot = all.padj, "DF20_FindMarkers_test_padj.png", path = "jesslyn_plots/PDX_test", height = 7.15, width = 7.15)
#compare logFC
all.logFC <- ggpairs(all, columns = c("wilcox.logFC", "t.logFC", "MAST.logFC", "LR.logFC"), diag = "blank") + labs(title = "Comparing logFC values between statistical tests")
all.logFC[2,1] <- all.logFC[2,1] + geom_abline(colour = "grey")
all.logFC[3,1] <- all.logFC[3,1] + geom_abline(colour = "grey")
all.logFC[3,2] <- all.logFC[3,2] + geom_abline(colour = "grey")
all.logFC[4,1] <- all.logFC[4,1] + geom_abline(colour = "grey")
all.logFC[4,2] <- all.logFC[4,2] + geom_abline(colour = "grey")
all.logFC[4,3] <- all.logFC[4,3] + geom_abline(colour = "grey")
all.logFC
ggsave(plot = all.logFC, "DF20_FindMarkers_test_logFC.png", path = "jesslyn_plots/PDX_test", height = 7.15, width = 7.15)
#compare ranks
all.ranks <- ggpairs(all, columns = c("wilcox.rank", "t.rank", "MAST.rank", "LR.rank"), diag = "blank") + labs(title = "Comparing gene ranks between statistical tests")
all.ranks[2,1] <- all.ranks[2,1] + geom_abline(colour = "grey")
all.ranks[3,1] <- all.ranks[3,1] + geom_abline(colour = "grey")
all.ranks[3,2] <- all.ranks[3,2] + geom_abline(colour = "grey")
all.ranks[4,1] <- all.ranks[4,1] + geom_abline(colour = "grey")
all.ranks[4,2] <- all.ranks[4,2] + geom_abline(colour = "grey")
all.ranks[4,3] <- all.ranks[4,3] + geom_abline(colour = "grey")
all.ranks
ggsave(plot = all.ranks, "DF20_FindMarkers_test_ranks.png", path = "jesslyn_plots/PDX_test", height = 7.15, width = 7.15)